From e3a98e36a287303a5025f14d12bab226f7640e3d Mon Sep 17 00:00:00 2001 From: "awilliam@xenbuild.aw" Date: Tue, 7 Mar 2006 20:01:29 -0700 Subject: [PATCH] [IA64] VTI: fix Oops: time tick before it's due 1. Guest may set itm several times in one execution of timer handler of guest. VMM need to handle this situation. 2. VMM don't need to stop guest timer when switching out and rest guest timer when switching in, this may make room for some corner case, I don't figure out this kind of corner cases now :-), I just removed this logic. 3. When VMM emulate writing itv, VMM can't simply stop timer, when guest is masked. 4. All operations such as read/write itv, itc, itm don't need to disable interrupt, due to there is no conflict access. After all these modifications, VTIdomain don't complain "Oops: time tick before it's due", I don't do the full test:-). Signed-off-by: Anthony Xu --- xen/arch/ia64/vmx/vlsapic.c | 92 +++++++++++++++++++++------------ xen/arch/ia64/xen/xenmisc.c | 8 +-- xen/include/asm-ia64/vmx_vcpu.h | 15 +++--- 3 files changed, 68 insertions(+), 47 deletions(-) diff --git a/xen/arch/ia64/vmx/vlsapic.c b/xen/arch/ia64/vmx/vlsapic.c index 79a93018e6..19c135c842 100644 --- a/xen/arch/ia64/vmx/vlsapic.c +++ b/xen/arch/ia64/vmx/vlsapic.c @@ -97,16 +97,15 @@ static void vtm_timer_fn(void *data) { vtime_t *vtm; VCPU *vcpu = data; - u64 cur_itc,vitm; - - UINT64 vec; - - vec = VCPU(vcpu, itv) & 0xff; - vmx_vcpu_pend_interrupt(vcpu, vec); + u64 cur_itc,vitv; + vitv = VCPU(vcpu, itv); + if ( !ITV_IRQ_MASK(vitv) ){ + vmx_vcpu_pend_interrupt(vcpu, vitv & 0xff); + } vtm=&(vcpu->arch.arch_vmx.vtm); cur_itc = now_itc(vtm); - vitm =VCPU(vcpu, itm); + // vitm =VCPU(vcpu, itm); //fire_itc2 = cur_itc; //fire_itm2 = vitm; update_last_itc(vtm,cur_itc); // pseudo read to update vITC @@ -135,51 +134,72 @@ uint64_t vtm_get_itc(VCPU *vcpu) vtime_t *vtm; vtm=&(vcpu->arch.arch_vmx.vtm); - // FIXME: should use local_irq_disable & local_irq_enable ?? - local_irq_save(spsr); guest_itc = now_itc(vtm); -// update_last_itc(vtm, guest_itc); - - local_irq_restore(spsr); return guest_itc; } + + + void vtm_set_itc(VCPU *vcpu, uint64_t new_itc) { - uint64_t spsr; + uint64_t vitm, vitv; vtime_t *vtm; - + vitm = VCPU(vcpu,itm); + vitv = VCPU(vcpu,itv); vtm=&(vcpu->arch.arch_vmx.vtm); - local_irq_save(spsr); vtm->vtm_offset = new_itc - ia64_get_itc(); vtm->last_itc = new_itc; - vtm_interruption_update(vcpu, vtm); - local_irq_restore(spsr); + if(vitm < new_itc){ + clear_bit(ITV_VECTOR(vitv), &VCPU(vcpu, irr[0])); + stop_timer(&vtm->vtm_timer); + } } -void vtm_set_itv(VCPU *vcpu) -{ - uint64_t spsr,itv; - vtime_t *vtm; +#define TIMER_SLOP (50*1000) /* ns */ /* copy from timer.c */ +extern u64 cycle_to_ns(u64 cyle); + + +void vtm_set_itm(VCPU *vcpu, uint64_t val) +{ + vtime_t *vtm; + uint64_t vitv, cur_itc, expires; + vitv = VCPU(vcpu, itv); vtm=&(vcpu->arch.arch_vmx.vtm); - local_irq_save(spsr); - itv = VCPU(vcpu, itv); - if ( ITV_IRQ_MASK(itv) ) + // TODO; need to handle VHPI in future + clear_bit(ITV_VECTOR(vitv), &VCPU(vcpu, irr[0])); + VCPU(vcpu,itm)=val; + cur_itc =now_itc(vtm); + if(val > vtm->last_itc){ + expires = NOW() + cycle_to_ns(val-cur_itc) + TIMER_SLOP; + set_timer(&vtm->vtm_timer, expires); + }else{ stop_timer(&vtm->vtm_timer); - vtm_interruption_update(vcpu, vtm); - local_irq_restore(spsr); + } +} + + +void vtm_set_itv(VCPU *vcpu, uint64_t val) +{ + uint64_t olditv; + olditv = VCPU(vcpu, itv); + VCPU(vcpu, itv) = val; + if(ITV_IRQ_MASK(val)){ + clear_bit(ITV_VECTOR(olditv), &VCPU(vcpu, irr[0])); + }else if(ITV_VECTOR(olditv)!=ITV_VECTOR(val)){ + if(test_and_clear_bit(ITV_VECTOR(olditv), &VCPU(vcpu, irr[0]))) + set_bit(ITV_VECTOR(val), &VCPU(vcpu, irr[0])); + } } /* - * Update interrupt or hook the vtm timer for fire + * Update interrupt or hook the vtm timer for fire * At this point vtm_timer should be removed if itv is masked. */ /* Interrupt must be disabled at this point */ - -extern u64 cycle_to_ns(u64 cyle); -#define TIMER_SLOP (50*1000) /* ns */ /* copy from timer.c */ +/* void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm) { uint64_t cur_itc,vitm,vitv; @@ -197,8 +217,7 @@ void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm) cur_itc =now_itc(vtm); diff_last = vtm->last_itc - vitm; diff_now = cur_itc - vitm; - update_last_itc (vtm,cur_itc); - + if ( diff_last >= 0 ) { // interrupt already fired. stop_timer(&vtm->vtm_timer); @@ -207,28 +226,32 @@ void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm) // ITV is fired. vmx_vcpu_pend_interrupt(vcpu, vitv&0xff); } +*/ /* Both last_itc & cur_itc < itm, wait for fire condition */ - else { +/* else { expires = NOW() + cycle_to_ns(0-diff_now) + TIMER_SLOP; set_timer(&vtm->vtm_timer, expires); } local_irq_restore(spsr); } + */ /* * Action for vtm when the domain is scheduled out. * Remove the timer for vtm. */ +/* void vtm_domain_out(VCPU *vcpu) { if(!is_idle_domain(vcpu->domain)) stop_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer); } - + */ /* * Action for vtm when the domain is scheduled in. * Fire vtm IRQ or add the timer for vtm. */ +/* void vtm_domain_in(VCPU *vcpu) { vtime_t *vtm; @@ -238,6 +261,7 @@ void vtm_domain_in(VCPU *vcpu) vtm_interruption_update(vcpu, vtm); } } + */ /* * Next for vLSapic diff --git a/xen/arch/ia64/xen/xenmisc.c b/xen/arch/ia64/xen/xenmisc.c index 70e3986b2e..6c80c61ff2 100644 --- a/xen/arch/ia64/xen/xenmisc.c +++ b/xen/arch/ia64/xen/xenmisc.c @@ -306,9 +306,9 @@ void context_switch(struct vcpu *prev, struct vcpu *next) uint64_t pta; local_irq_save(spsr); - if(VMX_DOMAIN(prev)){ - vtm_domain_out(prev); - } +// if(VMX_DOMAIN(prev)){ +// vtm_domain_out(prev); +// } context_switch_count++; switch_to(prev,next,prev); // if(VMX_DOMAIN(current)){ @@ -326,7 +326,7 @@ if (!i--) { printk("+"); i = 1000000; } } if (VMX_DOMAIN(current)){ - vtm_domain_in(current); +// vtm_domain_in(current); vmx_load_all_rr(current); }else{ extern char ia64_ivt; diff --git a/xen/include/asm-ia64/vmx_vcpu.h b/xen/include/asm-ia64/vmx_vcpu.h index 2f65d32b2d..6c15ceabc4 100644 --- a/xen/include/asm-ia64/vmx_vcpu.h +++ b/xen/include/asm-ia64/vmx_vcpu.h @@ -102,10 +102,11 @@ extern IA64FAULT vmx_vcpu_set_psr_l(VCPU *vcpu, UINT64 val); extern void vtm_init(VCPU *vcpu); extern uint64_t vtm_get_itc(VCPU *vcpu); extern void vtm_set_itc(VCPU *vcpu, uint64_t new_itc); -extern void vtm_set_itv(VCPU *vcpu); +extern void vtm_set_itv(VCPU *vcpu, uint64_t val); +extern void vtm_set_itm(VCPU *vcpu, uint64_t val); extern void vtm_interruption_update(VCPU *vcpu, vtime_t* vtm); -extern void vtm_domain_out(VCPU *vcpu); -extern void vtm_domain_in(VCPU *vcpu); +//extern void vtm_domain_out(VCPU *vcpu); +//extern void vtm_domain_in(VCPU *vcpu); extern void vlsapic_reset(VCPU *vcpu); extern int vmx_check_pending_irq(VCPU *vcpu); extern void guest_write_eoi(VCPU *vcpu); @@ -255,10 +256,7 @@ static inline IA64FAULT vmx_vcpu_set_itm(VCPU *vcpu, u64 val) { - vtime_t *vtm; - vtm=&(vcpu->arch.arch_vmx.vtm); - VCPU(vcpu,itm)=val; - vtm_interruption_update(vcpu, vtm); + vtm_set_itm(vcpu, val); return IA64_NO_FAULT; } static inline @@ -299,8 +297,7 @@ IA64FAULT vmx_vcpu_set_itv(VCPU *vcpu, u64 val) { - VCPU(vcpu,itv)=val; - vtm_set_itv(vcpu); + vtm_set_itv(vcpu, val); return IA64_NO_FAULT; } static inline -- 2.30.2